* first.
*/
HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64"", c->cr3);
- /* current!=vcpu as not called by arch_vmx_do_launch */
mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
if( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
goto bad_cr3;
svm_load_cpu_user_regs(v, regs);
}
-static void arch_svm_do_launch(struct vcpu *v)
-{
- svm_do_launch(v);
-
- if ( paging_mode_hap(v->domain) ) {
- v->arch.hvm_svm.vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
- }
-
- reset_stack_and_jump(svm_asm_do_launch);
-}
-
static void svm_ctxt_switch_from(struct vcpu *v)
{
svm_save_dr(v);
svm_restore_dr(v);
}
+static void arch_svm_do_resume(struct vcpu *v)
+{
+ if ( v->arch.hvm_svm.launch_core != smp_processor_id() )
+ {
+ v->arch.hvm_svm.launch_core = smp_processor_id();
+ hvm_migrate_timers(v);
+ }
+
+ hvm_do_resume(v);
+ reset_stack_and_jump(svm_asm_do_resume);
+}
+
static int svm_vcpu_initialise(struct vcpu *v)
{
int rc;
- v->arch.schedule_tail = arch_svm_do_launch;
+ v->arch.schedule_tail = arch_svm_do_resume;
v->arch.ctxt_switch_from = svm_ctxt_switch_from;
v->arch.ctxt_switch_to = svm_ctxt_switch_to;
v->arch.hvm_svm.saved_irq_vector = -1;
+ v->arch.hvm_svm.launch_core = -1;
+
if ( (rc = svm_create_vmcb(v)) != 0 )
{
dprintk(XENLOG_WARNING,
/* check CPUID for nested paging support */
cpuid(0x8000000A, &eax, &ebx, &ecx, &edx);
- if ( edx & 0x01 ) { /* nested paging */
+ if ( edx & 0x01 ) /* nested paging */
+ {
hap_capable_system = 1;
}
- else if ( opt_hap_enabled ) {
+ else if ( opt_hap_enabled )
+ {
printk(" nested paging is not supported by this CPU.\n");
hap_capable_system = 0; /* no nested paging, we disable flag. */
}
return 1;
}
-void arch_svm_do_resume(struct vcpu *v)
-{
- /* pinning VCPU to a different core? */
- if ( v->arch.hvm_svm.launch_core == smp_processor_id()) {
- hvm_do_resume( v );
- reset_stack_and_jump( svm_asm_do_resume );
- }
- else {
- if (svm_dbg_on)
- printk("VCPU core pinned: %d to %d\n",
- v->arch.hvm_svm.launch_core, smp_processor_id() );
- v->arch.hvm_svm.launch_core = smp_processor_id();
- hvm_migrate_timers( v );
- hvm_do_resume( v );
- reset_stack_and_jump( svm_asm_do_resume );
- }
-}
-
static int svm_do_nested_pgfault(paddr_t gpa, struct cpu_user_regs *regs)
{
if (mmio_space(gpa)) {
arch_svm->vmcb->exception_intercepts = MONITOR_DEFAULT_EXCEPTION_BITMAP;
- if ( paging_mode_hap(v->domain) ) {
+ if ( paging_mode_hap(v->domain) )
+ {
vmcb->cr0 = arch_svm->cpu_shadow_cr0;
vmcb->np_enable = 1; /* enable nested paging */
vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_PG;
+ vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
}
return 0;
arch_svm->vmcb = NULL;
}
-void svm_do_launch(struct vcpu *v)
-{
- hvm_stts(v);
-
- /* current core is the one we intend to perform the VMRUN on */
- v->arch.hvm_svm.launch_core = smp_processor_id();
-
- v->arch.schedule_tail = arch_svm_do_resume;
-}
-
static void svm_dump_sel(char *name, svm_segment_register_t *s)
{
printk("%s: sel=0x%04x, attr=0x%04x, limit=0x%08x, base=0x%016llx\n",
popl %eax; \
addl $(NR_SKIPPED_REGS*4), %esp
- ALIGN
-
#define VMRUN .byte 0x0F,0x01,0xD8
#define VMLOAD .byte 0x0F,0x01,0xDA
#define VMSAVE .byte 0x0F,0x01,0xDB
#define STGI .byte 0x0F,0x01,0xDC
#define CLGI .byte 0x0F,0x01,0xDD
-ENTRY(svm_asm_do_launch)
+ENTRY(svm_asm_do_resume)
+ GET_CURRENT(%ebx)
+ xorl %ecx,%ecx
+ notl %ecx
+ cli # tests must not race interrupts
+ movl VCPU_processor(%ebx),%eax
+ shl $IRQSTAT_shift,%eax
+ test %ecx,irq_stat(%eax,1)
+ jnz svm_process_softirqs
+ call svm_intr_assist
+ call svm_load_cr2
+
CLGI
sti
GET_CURRENT(%ebx)
addl $4,%esp
jmp svm_asm_do_resume
- ALIGN
-
-ENTRY(svm_asm_do_resume)
-svm_test_all_events:
- GET_CURRENT(%ebx)
-/*test_all_events:*/
- xorl %ecx,%ecx
- notl %ecx
- cli # tests must not race interrupts
-/*test_softirqs:*/
- movl VCPU_processor(%ebx),%eax
- shl $IRQSTAT_shift,%eax
- test %ecx,irq_stat(%eax,1)
- jnz svm_process_softirqs
-svm_restore_all_guest:
- call svm_intr_assist
- call svm_load_cr2
- /*
- * Check if we are going back to AMD-V based VM
- * By this time, all the setups in the VMCB must be complete.
- */
- jmp svm_asm_do_launch
-
ALIGN
svm_process_softirqs:
sti
call do_softirq
- jmp svm_test_all_events
+ jmp svm_asm_do_resume
#define STGI .byte 0x0F,0x01,0xDC
#define CLGI .byte 0x0F,0x01,0xDD
-ENTRY(svm_asm_do_launch)
+ENTRY(svm_asm_do_resume)
+ GET_CURRENT(%rbx)
+ cli # tests must not race interrupts
+ movl VCPU_processor(%rbx),%eax
+ shl $IRQSTAT_shift, %rax
+ leaq irq_stat(%rip), %rdx
+ testl $~0, (%rdx, %rax, 1)
+ jnz svm_process_softirqs
+ call svm_intr_assist
+ call svm_load_cr2
+
CLGI
sti
GET_CURRENT(%rbx)
call svm_vmexit_handler
jmp svm_asm_do_resume
-ENTRY(svm_asm_do_resume)
-svm_test_all_events:
- GET_CURRENT(%rbx)
-/*test_all_events:*/
- cli # tests must not race interrupts
-/*test_softirqs:*/
- movl VCPU_processor(%rbx),%eax
- shl $IRQSTAT_shift, %rax
- leaq irq_stat(%rip), %rdx
- testl $~0, (%rdx, %rax, 1)
- jnz svm_process_softirqs
-svm_restore_all_guest:
- call svm_intr_assist
- call svm_load_cr2
- /*
- * Check if we are going back to AMD-V based VM
- * By this time, all the setups in the VMCB must be complete.
- */
- jmp svm_asm_do_launch
-
ALIGN
svm_process_softirqs:
sti
call do_softirq
- jmp svm_test_all_events
+ jmp svm_asm_do_resume
#include <asm/i387.h>
extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
-extern void svm_do_launch(struct vcpu *v);
-extern void arch_svm_do_resume(struct vcpu *v);
extern u64 root_vmcb_pa[NR_CPUS];
u32 *msrpm;
u64 vmexit_tsc; /* tsc read at #VMEXIT. for TSC_OFFSET */
int saved_irq_vector;
- u32 launch_core;
+ int launch_core;
unsigned long flags; /* VMCB flags */
unsigned long cpu_shadow_cr0; /* Guest value for CR0 */